# Load libraries, install if needed
library(tidyverse)
#> Warning: package 'tidyr' was built under R version 4.0.5
library(tidylog)
library(RCurl)
library(RColorBrewer)
#> Warning: package 'RColorBrewer' was built under R version 4.0.5
library(sdmTMB) # install.packages("sdmTMB", dependencies = TRUE) should get you INLA as well, else do INLA manually first:
library(INLA) # install.packages("INLA",repos=c(getOption("repos"),INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE)
#> Warning: package 'INLA' was built under R version 4.0.4
library(devtools)
library(patchwork)
library(sf)
#> Warning: package 'sf' was built under R version 4.0.5
library(ggeffects)
library(visreg)
library(boot)
# Source utm function and map plots
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/lon-lat-utm.R")
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/map-plot.R")
Filter litter categories that have enough data to work with (hard to fit models to e.g., glass and metal since they occur so rarely in the data, some years have nothing. Could consider pooling years for those. See exploratory model fitting script (doesn’t exist yet))
# Read and make data long so that I can for loop through all categories
west <- readr::read_csv("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/data/west_coast_litter.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
pivot_longer(c("fishery_a", "fishery_b", "plast_a", "plast_b", "sup_a", "sup_b", "tot_a", "tot_b"),
names_to = "litter_category", values_to = "density")
#> Rows: 559 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, ansse_area, trend_area, var
#> dbl (18): plast_a, sup_a, fishery_a, tot_a, plast_b, quarter, st_no, haul_no...
#> lgl (1): balse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 350 unique values and 0% NA
#>
#> pivot_longer: reorganized (plast_a, sup_a, fishery_a, tot_a, plast_b, …) into (litter_category, density) [was 559x26, now 4472x20]
east <- readr::read_csv("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/data/east_coast_litter.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
pivot_longer(c("fishery_a", "fishery_b", "plast_a", "plast_b", "sup_a", "sup_b", "tot_a", "tot_b"),
names_to = "litter_category", values_to = "density")
#> Rows: 546 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, balse_area, trend_area, var
#> dbl (18): plast_a, sup_a, fishery_a, tot_a, plast_b, quarter, st_no, haul_no...
#> lgl (1): ansse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 417 unique values and 0% NA
#>
#> pivot_longer: reorganized (plast_a, sup_a, fishery_a, tot_a, plast_b, …) into (litter_category, density) [was 546x26, now 4368x20]
# Load pred grids
pred_grid_west <- read_csv("data/pred_grid_west.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(1),
depth_sc = (depth - mean(west$depth)) / sd(west$depth)) %>%
mutate(X = X*1000,
Y = Y*1000) %>%
drop_na(area)
#> Rows: 37512 Columns: 7
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (1): area
#> dbl (6): X, Y, year, lon, lat, depth
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with one unique value and 0% NA
#>
#> new variable 'depth_sc' (double) with 2,732 unique values and 0% NA
#>
#> mutate: changed 37,512 values (100%) of 'X' (0 new NA)
#>
#> changed 37,512 values (100%) of 'Y' (0 new NA)
#>
#> drop_na: removed 15,030 rows (40%), 22,482 rows remaining
# ggplot(pred_grid_west, aes(X*1000, Y*1000, color = area)) +
# geom_point()
pred_grid_east <- read_csv("data/pred_grid_east.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(1),
depth_sc = (depth - mean(east$depth)) / sd(east$depth)) %>%
mutate(X = X*1000,
Y = Y*1000)
#> Rows: 121410 Columns: 7
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (1): area
#> dbl (6): X, Y, year, lon, lat, depth
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with one unique value and 0% NA
#>
#> new variable 'depth_sc' (double) with 6,366 unique values and 0% NA
#>
#> mutate: changed 121,410 values (100%) of 'X' (0 new NA)
#>
#> changed 121,410 values (100%) of 'Y' (0 new NA)
For plastic, sup and fishery I have enough data to fit spatial models with both biomass density (Tweedie) and numbers (Poisson). For the other categories, I will only fit presence/absence models, because that’s almost what we got anyway.
# https://haakonbakkagit.github.io/btopic104.html
# https://haakonbakkagit.github.io/btopic114.html
# max.edge <- mean(c(diff(range(num$X)), diff(range(num$Y)))) / 15
# cutoff <- max.edge/5
west_mesh <- make_mesh(west %>% filter(litter_category == "fishery_a"), c("X", "Y"), cutoff = 4)
#> filter: removed 3,913 rows (88%), 559 rows remaining
#mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), type = "kmeans", n_knots = 50)
plot(west_mesh)
east_mesh <- make_mesh(east %>% filter(litter_category == "fishery_a"), c("X", "Y"), cutoff = 4)
#> filter: removed 3,822 rows (88%), 546 rows remaining
#mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), type = "kmeans", n_knots = 50)
plot(east_mesh)
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(east$litter_category)) {
dd <- east %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = east_mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "off"
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("east", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Save model object
saveRDS(m, paste("output/models/east_", i, ".rds", sep = ""))
# Predict on grid
pred <- predict(m, newdata = pred_grid_east) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
nsim <- 500
sim <- predict(m, newdata = pred_grid_east, nsim = nsim)
# Plot CV in space
# Just plot last year
# sim_last <- sim[pred_grid_east$year == max(pred_grid_east$year), ]
#
# pred_last <- pred[pred$year == max(pred_grid_east$year), ]
# pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
#
# print(plot_map_east +
# geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
# scale_fill_viridis_c() +
# geom_sf(size = 0.1) +
# NULL)
#
# ggsave(paste("figures/supp/cv_biomass_", i, ".png", sep = ""))
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(2*2, nrow(sim))) %>% mutate(model = paste("east", i, sep = "_"))
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(2*2, nrow(sim)),
return_sims = TRUE) %>% mutate(model = paste("east", i, sep = "_"))
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
ncells <- filter(pred_grid_east, year == max(pred_grid_east$year)) %>% nrow()
index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
#index_sim2 <- index_sim %>% mutate(est2 = est/(ncells * 2*2))
print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
# geom_line(data = index_sim2, aes(year, est2), color = "green", linetype = 2, size = 2, alpha = 0.6, inherit.aes = FALSE) +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_density = mean(density)),
aes(year, mean_density, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean density') +
NULL)
ggsave(paste("figures/supp/east_mean_pred_comp_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000323 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.23 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.699428 seconds (Warm-up)
#> Chain 1: 0.003199 seconds (Sampling)
#> Chain 1: 0.702627 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#>
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000182 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.82 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.771777 seconds (Warm-up)
#> Chain 1: 0.002839 seconds (Sampling)
#> Chain 1: 0.774616 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000221 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.21 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.807185 seconds (Warm-up)
#> Chain 1: 0.002961 seconds (Sampling)
#> Chain 1: 0.810146 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000204 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.04 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.758691 seconds (Warm-up)
#> Chain 1: 0.002885 seconds (Sampling)
#> Chain 1: 0.761576 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_kappa` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000211 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.11 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.855053 seconds (Warm-up)
#> Chain 1: 0.002769 seconds (Sampling)
#> Chain 1: 0.857822 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.00021 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.1 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.742645 seconds (Warm-up)
#> Chain 1: 0.003319 seconds (Sampling)
#> Chain 1: 0.745964 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `thetaf` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000216 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.16 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.975662 seconds (Warm-up)
#> Chain 1: 0.002979 seconds (Sampling)
#> Chain 1: 0.978641 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000196 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.96 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.859607 seconds (Warm-up)
#> Chain 1: 0.002855 seconds (Sampling)
#> Chain 1: 0.862462 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef_biom <- dplyr::bind_rows(data_list_coef)
dat_pred_biom <- dplyr::bind_rows(data_list_pred)
dat_sim_biom <- dplyr::bind_rows(data_list_sim)
dat_sims_biom <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef_biom, "output/east_dat_coef.csv")
write_csv(dat_pred_biom, "output/east_dat_pred.csv")
write_csv(dat_sim_biom, "output/east_dat_sim.csv")
write_csv(dat_sims_biom, "output/east_dat_sims.csv")
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(west$litter_category)) {
dd <- west %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = west_mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "off"
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("west", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Save model object
saveRDS(m, paste("output/models/west_", i, ".rds", sep = ""))
# Predict on grid
pred <- predict(m, newdata = pred_grid_west) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
nsim <- 500
sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
# Plot CV in space
# Just plot last year
# sim_last <- sim[pred_grid_west$year == max(pred_grid_west$year), ]
#
# pred_last <- pred[pred$year == max(pred_grid_west$year), ]
# pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
#
# print(plot_map_west +
# geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
# scale_fill_viridis_c() +
# geom_sf(size = 0.1) +
# NULL)
#
# ggsave(paste("figures/supp/cv_biomass_", i, ".png", sep = ""))
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(2*2, nrow(sim))) %>% mutate(model = paste("west", i, sep = "_"))
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(2*2, nrow(sim)),
return_sims = TRUE) %>% mutate(model = paste("west", i, sep = "_"))
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_density = mean(density)),
aes(year, mean_density, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
#geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean density') +
NULL)
ggsave(paste("figures/supp/west_mean_pred_comp_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000144 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.44 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.514421 seconds (Warm-up)
#> Chain 1: 0.002217 seconds (Sampling)
#> Chain 1: 0.516638 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#>
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000139 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.39 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.553303 seconds (Warm-up)
#> Chain 1: 0.001917 seconds (Sampling)
#> Chain 1: 0.55522 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000149 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.49 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.424811 seconds (Warm-up)
#> Chain 1: 0.002062 seconds (Sampling)
#> Chain 1: 0.426873 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000143 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.43 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.524793 seconds (Warm-up)
#> Chain 1: 0.001975 seconds (Sampling)
#> Chain 1: 0.526768 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000141 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.41 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.412124 seconds (Warm-up)
#> Chain 1: 0.001888 seconds (Sampling)
#> Chain 1: 0.414012 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000159 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.59 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.468001 seconds (Warm-up)
#> Chain 1: 0.001915 seconds (Sampling)
#> Chain 1: 0.469916 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000164 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.64 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.71309 seconds (Warm-up)
#> Chain 1: 0.001871 seconds (Sampling)
#> Chain 1: 0.714961 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000131 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.31 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.508603 seconds (Warm-up)
#> Chain 1: 0.001788 seconds (Sampling)
#> Chain 1: 0.510391 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef_biom <- dplyr::bind_rows(data_list_coef)
dat_pred_biom <- dplyr::bind_rows(data_list_pred)
dat_sim_biom <- dplyr::bind_rows(data_list_sim)
dat_sims_biom <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef_biom, "output/west_dat_coef.csv")
write_csv(dat_pred_biom, "output/west_dat_pred.csv")
write_csv(dat_sim_biom, "output/west_dat_sim.csv")
write_csv(dat_sims_biom, "output/west_dat_sims.csv")